func runtime.alignUp

39 uses

	runtime (current package)
		cgocall.go#L456: 		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
		malloc.go#L588: 		p = alignUp(p+(256<<10), heapArenaBytes)
		malloc.go#L640: 	n = alignUp(n, heapArenaBytes)
		malloc.go#L838: 		p = alignUp(p, align)
		malloc.go#L852: 		pAligned := alignUp(p, align)
		malloc.go#L1105: 				off = alignUp(off, 8)
		malloc.go#L1113: 				off = alignUp(off, 8)
		malloc.go#L1115: 				off = alignUp(off, 4)
		malloc.go#L1117: 				off = alignUp(off, 2)
		malloc.go#L1602: 	persistent.off = alignUp(persistent.off, align)
		malloc.go#L1620: 		persistent.off = alignUp(goarch.PtrSize, align)
		malloc.go#L1679: 	p := alignUp(l.next, align)
		malloc.go#L1684: 	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
		mbitmap.go#L723: 			spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
		mbitmap.go#L725: 			spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
		mbitmap.go#L726: 			npages := alignUp(spaceNeeded, pageSize) / pageSize
		mbitmap.go#L761: 		off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
		mem_linux.go#L100: 		beg := alignUp(uintptr(v), physHugePageSize)
		mfinal.go#L504: 		nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
		mfinal.go#L506: 	nret = alignUp(nret, goarch.PtrSize)
		mgcscavenge.go#L910: 		max = alignUp(max, minimum)
		mgcscavenge.go#L970: 		hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
		mheap.go#L1235: 		base = alignUp(base, physPageSize)
		mheap.go#L1478: 	ask := alignUp(npage, pallocChunkPages) * pageSize
		mheap.go#L1484: 	nBase := alignUp(end, physPageSize)
		mheap.go#L1527: 		nBase = alignUp(h.curArena.base+ask, physPageSize)
		mpagealloc.go#L177: 	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
		mpagealloc.go#L362: 	limit := alignUp(base+size, pallocChunkBytes)
		mpagealloc_64bit.go#L78: 		b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
		mpagealloc_64bit.go#L119: 		limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
		mpagealloc_64bit.go#L211: 	needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
		mranges.go#L77: 	base := alignUp(a.base.addr(), uintptr(align)) + len
		pinner.go#L302: 	bytes := alignUp(s.pinnerBitSize(), 8)
		proc.go#L5015: 	totalSize = alignUp(totalSize, sys.StackAlign)
		stack.go#L352: 		n = uint32(alignUp(uintptr(n), physPageSize))
		stkframe.go#L284: 		off:       -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
		stubs.go#L424: func alignUp(n, a uintptr) uintptr {
		traceback.go#L503: 		frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
		traceregion.go#L44: 	n = alignUp(n, 8)